[IA64] handle ld.s on guest tr mapped page (VTI)
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Wed, 16 Aug 2006 20:28:57 +0000 (14:28 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Wed, 16 Aug 2006 20:28:57 +0000 (14:28 -0600)
Windows does an ld.s on a tr mapped page.
Currently xen/ipf uses tc/vtlb to emulate guest TR,
that may cause guest ld.s on tr page to be deferred, it is not correct.
For trapping this ld.s intruction, xen/ipf always set machine dcr.dm=0.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
[Moved cr.dcr restore to only impact vti -> non-vti switch]
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/xen/domain.c
xen/include/asm-ia64/vmx_vcpu.h
xen/include/asm-ia64/vmx_vpd.h

index bab3199dae33435f905b84c379db324896de644a..4d55c62bf677ea0b6538dbfcbfad980fc172ed19 100644 (file)
@@ -195,7 +195,8 @@ vmx_load_all_rr(VCPU *vcpu)
                        (void *)vcpu->domain->shared_info,
                        (void *)vcpu->arch.privregs,
                        (void *)vcpu->arch.vhpt.hash, pal_vaddr );
-       ia64_set_pta(vcpu->arch.arch_vmx.mpta);
+       ia64_set_pta(VMX(vcpu, mpta));
+       ia64_set_dcr(VMX(vcpu, mdcr));
 
        ia64_srlz_d();
        ia64_set_psr(psr);
index b1d7fea4279295fca30ab4c3caa95e087f68b361..4aababfd685dcc93ffea945bf3c383101f03a3b1 100644 (file)
@@ -136,10 +136,18 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
     __ia64_save_fpu(prev->arch._thread.fph);
     __ia64_load_fpu(next->arch._thread.fph);
-    if (VMX_DOMAIN(prev))
-           vmx_save_state(prev);
+    if (VMX_DOMAIN(prev)) {
+       vmx_save_state(prev);
+       if (!VMX_DOMAIN(next)) {
+           /* VMX domains can change the physical cr.dcr.
+            * Restore default to prevent leakage. */
+           ia64_setreg(_IA64_REG_CR_DCR, (IA64_DCR_DP | IA64_DCR_DK
+                          | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_PP
+                          | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
+       }
+    }
     if (VMX_DOMAIN(next))
-           vmx_load_state(next);
+       vmx_load_state(next);
     /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
     prev = ia64_switch_to(next);
 
index 461bc6b710e3f0ca4ab3bdc0ea330dc6b6c953ed..a82e31fb7f940f6b778124a37236e4401e7289e5 100644 (file)
@@ -239,12 +239,13 @@ vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
 {
     u64 mdcr, mask;
     VCPU(vcpu,dcr)=val;
-    /* All vDCR bits will go to mDCR, except for be/pp bit */
+    /* All vDCR bits will go to mDCR, except for be/pp/dm bits */
     mdcr = ia64_get_dcr();
-    mask = IA64_DCR_BE | IA64_DCR_PP;
+    /* Machine dcr.dm masked to handle guest ld.s on tr mapped page */
+    mask = IA64_DCR_BE | IA64_DCR_PP | IA64_DCR_DM;
     mdcr = ( mdcr & mask ) | ( val & (~mask) );
     ia64_set_dcr( mdcr);
-
+    VMX(vcpu, mdcr) = mdcr;
     return IA64_NO_FAULT;
 }
 
index 77d79fb7044293cc20c607f5c1acea4ed7382373..e6a7ee48c53ef9d62783aa9e81d5e1e71be46b98 100644 (file)
@@ -89,6 +89,7 @@ struct arch_vmx_struct {
 //    unsigned long   mrr5;
 //    unsigned long   mrr6;
 //    unsigned long   mrr7;
+    unsigned long   mdcr;
     unsigned long   mpta;
 //    unsigned long   rfi_pfs;
 //    unsigned long   rfi_iip;